System Logs va journalctl
System logging - bu Linux tizimlarida troubleshooting, monitoring va auditing uchun eng muhim vosita. Zamonaviy Linux distributionlarda systemd-journald va traditional syslog bir vaqtda ishlaydi.
Linux Logging Architecture
Logging Components
# systemd-journald - Zamonaviy logging daemon
# rsyslog/syslog-ng - Traditional syslog daemon
# kernel ring buffer - Kernel messages
# application logs - Dastur-specific log'lar
# Log data flow:
# Applications -> systemd-journald -> persistent storage
# -> rsyslog -> /var/log/
# -> other log processors
Log Locations
# systemd journal
/var/log/journal/ # Persistent journal storage
/run/log/journal/ # Volatile journal storage
# Traditional log files
/var/log/syslog # System messages (Ubuntu/Debian)
/var/log/messages # System messages (CentOS/RHEL)
/var/log/auth.log # Authentication logs
/var/log/kern.log # Kernel messages
/var/log/boot.log # Boot messages
/var/log/dmesg # Kernel ring buffer
/var/log/mail.log # Mail system logs
/var/log/apache2/ # Apache web server logs
/var/log/nginx/ # Nginx web server logs
journalctl - systemd Log Viewer
Basic journalctl Usage
# View all journal entries
journalctl # All logs (paged)
journalctl --no-pager # All logs without pager
# Recent logs
journalctl -n 20 # Last 20 entries
journalctl -n 50 # Last 50 entries
journalctl --since today # Today's logs
journalctl --since yesterday # Yesterday's logs
# Follow logs (real-time)
journalctl -f # Follow all logs
journalctl -f -n 10 # Follow with last 10 entries
# Boot logs
journalctl -b # Current boot logs
journalctl -b -1 # Previous boot logs
journalctl --list-boots # Available boot logs
Time-based Filtering
# Specific time ranges
journalctl --since "2023-12-01"
journalctl --since "2023-12-01 10:00:00"
journalctl --since "1 hour ago"
journalctl --since "30 minutes ago"
journalctl --since "yesterday"
journalctl --since "1 week ago"
# Time ranges
journalctl --since "2023-12-01" --until "2023-12-02"
journalctl --since "09:00" --until "17:00"
journalctl --since yesterday --until today
# Relative time
journalctl --since "-2 hours"
journalctl --since "-1 day"
journalctl --since "-1 week"
Service and Unit Filtering
# Service-specific logs
journalctl -u nginx # Nginx service logs
journalctl -u ssh # SSH service logs
journalctl -u docker # Docker service logs
journalctl -u systemd-networkd # Network service logs
# Multiple services
journalctl -u nginx -u apache2 # Multiple services
journalctl -u nginx.service # Full service name
# Follow service logs
journalctl -u nginx -f # Follow nginx logs
journalctl -u nginx -f -n 20 # Follow with last 20 entries
# User services
journalctl --user # User session logs
journalctl --user -u application # User service logs
Priority and Severity Filtering
# Priority levels (syslog compatible):
# 0: emerg (emergency)
# 1: alert
# 2: crit (critical)
# 3: err (error)
# 4: warning
# 5: notice
# 6: info
# 7: debug
# Filter by priority
journalctl -p err # Error and above (0-3)
journalctl -p warning # Warning and above (0-4)
journalctl -p info # Info and above (0-6)
journalctl -p debug # All messages (0-7)
# Specific priority only
journalctl -p err..err # Only error messages
journalctl -p warning..warning # Only warning messages
# Combined with other filters
journalctl -u nginx -p err # Nginx errors only
journalctl --since today -p crit # Today's critical messages
Advanced Filtering
# Process ID filtering
journalctl _PID=1234 # Specific process ID
journalctl _COMM=nginx # Process command name
journalctl _EXE=/usr/sbin/nginx # Executable path
# User filtering
journalctl _UID=1000 # Specific user ID
journalctl _GID=100 # Specific group ID
# System component filtering
journalctl _SYSTEMD_UNIT=nginx.service # Systemd unit
journalctl _KERNEL_DEVICE=sda1 # Kernel device
journalctl _HOSTNAME=server01 # Specific hostname
# Facility filtering (syslog)
journalctl SYSLOG_FACILITY=4 # System facilities
journalctl SYSLOG_FACILITY=10 # Security/authorization
# Combined filters
journalctl _SYSTEMD_UNIT=nginx.service --since "1 hour ago" -p warning
Output Formatting
# Output formats
journalctl -o short # Default format
journalctl -o verbose # Verbose format with all fields
journalctl -o json # JSON format
journalctl -o json-pretty # Pretty JSON format
journalctl -o cat # Message text only
journalctl -o export # Binary export format
# Custom field display
journalctl -o verbose | grep PRIORITY
journalctl -o json | jq '._HOSTNAME'
# No truncation
journalctl --no-truncate # Don't truncate long lines
journalctl -l # Same as --no-truncate
# Reverse order
journalctl -r # Reverse chronological order
journalctl -r -n 20 # Last 20 in reverse order
Journal Configuration
Journal Storage Configuration
# Journal configuration file
cat /etc/systemd/journald.conf
# Key settings:
Storage=persistent # Store logs on disk
Storage=volatile # Store logs in RAM only
Storage=auto # Automatic (persistent if /var/log/journal exists)
Storage=none # Disable journal storage
SystemMaxUse=1G # Maximum disk usage
SystemKeepFree=500M # Keep free disk space
SystemMaxFileSize=100M # Maximum single file size
SystemMaxFiles=100 # Maximum number of files
RuntimeMaxUse=100M # Maximum RAM usage
RuntimeKeepFree=50M # Keep free RAM
RuntimeMaxFileSize=50M # Maximum runtime file size
RuntimeMaxFiles=10 # Maximum runtime files
MaxRetentionSec=1month # Maximum retention time
Journal Maintenance
# Check journal disk usage
journalctl --disk-usage # Current disk usage
du -sh /var/log/journal/ # Directory size
# Vacuum (clean old logs)
sudo journalctl --vacuum-time=30d # Keep only 30 days
sudo journalctl --vacuum-size=1G # Keep only 1GB
sudo journalctl --vacuum-files=10 # Keep only 10 files
# Verify journal integrity
sudo journalctl --verify # Verify all journal files
sudo journalctl --verify --file=/var/log/journal/*/system.journal
# Rotate journal files
sudo systemctl kill --kill-who=main --signal=SIGUSR2 systemd-journald
# Flush journal to disk
sudo systemctl kill --signal=SIGRTMIN+1 systemd-journald
Journal Export and Import
# Export journal data
journalctl -o export > journal_export.txt
journalctl --since "2023-12-01" -o json > december_logs.json
# Binary export
journalctl -o export --output-fields=MESSAGE,PRIORITY,_SYSTEMD_UNIT > filtered_export.txt
# Import journal data (systemd-journal-remote)
sudo systemd-journal-remote --output=/var/log/journal/remote/ --split-mode=host --url-file=urls.txt
Traditional Log Files
rsyslog Configuration
# Main configuration file
cat /etc/rsyslog.conf
# Configuration includes
ls /etc/rsyslog.d/ # Additional configuration files
# Key configuration sections:
# Modules - input/output modules
# Global directives - global settings
# Rules - where to log what
# Example rules in rsyslog.conf:
mail.* /var/log/mail.log
auth,authpriv.* /var/log/auth.log
kern.* /var/log/kern.log
*.emerg :omusrmsg:*
Common Log Files Analysis
# System messages
tail -f /var/log/syslog # Ubuntu/Debian
tail -f /var/log/messages # CentOS/RHEL
# Authentication logs
tail -f /var/log/auth.log # Ubuntu/Debian
tail -f /var/log/secure # CentOS/RHEL
# Kernel messages
dmesg # Current kernel messages
dmesg -T # With timestamps
dmesg -f kern # Kernel facility only
dmesg --follow # Follow mode
# Boot messages
cat /var/log/boot.log
journalctl -b # Current boot (systemd)
# Mail logs
tail -f /var/log/mail.log # Mail system
tail -f /var/log/maillog # CentOS/RHEL
Log Rotation with logrotate
# Main configuration
cat /etc/logrotate.conf
# Service-specific configurations
ls /etc/logrotate.d/
# Example logrotate configuration
cat > /etc/logrotate.d/myapp << 'EOF'
/var/log/myapp/*.log {
daily
rotate 30
compress
delaycompress
missingok
notifempty
create 644 myapp myapp
postrotate
systemctl reload myapp 2>/dev/null || true
endscript
}
EOF
# Test logrotate configuration
sudo logrotate -d /etc/logrotate.d/myapp # Debug mode
sudo logrotate -f /etc/logrotate.d/myapp # Force rotation
# Manual logrotate run
sudo logrotate /etc/logrotate.conf
Log Analysis and Monitoring
Real-time Log Monitoring
# Multiple log files simultaneously
sudo multitail /var/log/syslog /var/log/auth.log /var/log/nginx/access.log
# Journal and syslog together
journalctl -f &
tail -f /var/log/syslog &
# Colored log viewing
sudo apt install ccze
tail -f /var/log/syslog | ccze -A
# Log filtering with grep
tail -f /var/log/syslog | grep -i error
journalctl -f | grep -E "(error|warning|critical)"
Log Analysis Scripts
#!/bin/bash
# log-analyzer.sh - Comprehensive log analysis
LOG_ANALYSIS_DIR="/var/log/analysis"
REPORT_FILE="$LOG_ANALYSIS_DIR/daily-report-$(date +%Y%m%d).txt"
ALERT_EMAIL="admin@company.com"
mkdir -p "$LOG_ANALYSIS_DIR"
# Function to log analysis results
log_analysis() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$REPORT_FILE"
}
# System errors analysis
analyze_system_errors() {
log_analysis "=== System Errors Analysis ==="
# Journal errors from last 24 hours
error_count=$(journalctl --since "24 hours ago" -p err --no-pager | wc -l)
log_analysis "Journal errors (24h): $error_count"
if [ "$error_count" -gt 0 ]; then
log_analysis "Recent system errors:"
journalctl --since "24 hours ago" -p err --no-pager | tail -10 | while read line; do
log_analysis " $line"
done
fi
# Syslog errors
if [ -f /var/log/syslog ]; then
syslog_errors=$(grep "$(date '+%b %d')" /var/log/syslog | grep -i error | wc -l)
log_analysis "Syslog errors today: $syslog_errors"
fi
}
# Authentication analysis
analyze_authentication() {
log_analysis "=== Authentication Analysis ==="
# SSH login attempts
failed_ssh=$(journalctl --since "24 hours ago" -u ssh | grep "Failed password" | wc -l)
successful_ssh=$(journalctl --since "24 hours ago" -u ssh | grep "Accepted password\|Accepted publickey" | wc -l)
log_analysis "SSH failed logins (24h): $failed_ssh"
log_analysis "SSH successful logins (24h): $successful_ssh"
if [ "$failed_ssh" -gt 10 ]; then
log_analysis "WARNING: High number of failed SSH attempts"
journalctl --since "24 hours ago" -u ssh | grep "Failed password" | \
awk '{print $11}' | sort | uniq -c | sort -nr | head -5 | while read count ip; do
log_analysis " $ip: $count attempts"
done
fi
# Sudo usage
sudo_count=$(journalctl --since "24 hours ago" | grep "sudo:" | wc -l)
log_analysis "Sudo usage (24h): $sudo_count"
}
# Service analysis
analyze_services() {
log_analysis "=== Service Analysis ==="
# Failed services
failed_services=$(systemctl --failed --no-legend | wc -l)
log_analysis "Currently failed services: $failed_services"
if [ "$failed_services" -gt 0 ]; then
log_analysis "Failed services:"
systemctl --failed --no-legend | while read service load active sub description; do
log_analysis " $service: $description"
done
fi
# Service restarts
restart_count=$(journalctl --since "24 hours ago" | grep "Started\|Stopped" | wc -l)
log_analysis "Service restarts (24h): $restart_count"
# High restart services
journalctl --since "24 hours ago" | grep "Started" | \
awk '{print $6}' | sort | uniq -c | sort -nr | head -5 | while read count service; do
if [ "$count" -gt 5 ]; then
log_analysis " $service: $count restarts (high)"
fi
done
}
# Disk space analysis
analyze_disk_space() {
log_analysis "=== Disk Space Analysis ==="
df -h | while read filesystem size used avail percent mountpoint; do
if [[ "$percent" =~ ^[0-9]+% ]]; then
percent_num=$(echo "$percent" | sed 's/%//')
if [ "$percent_num" -gt 90 ]; then
log_analysis "CRITICAL: $mountpoint is ${percent} full"
elif [ "$percent_num" -gt 80 ]; then
log_analysis "WARNING: $mountpoint is ${percent} full"
fi
fi
done
}
# Network analysis
analyze_network() {
log_analysis "=== Network Analysis ==="
# Network interface errors
interface_errors=$(journalctl --since "24 hours ago" | grep -i "network.*error\|interface.*down" | wc -l)
log_analysis "Network errors (24h): $interface_errors"
# DNS resolution issues
dns_errors=$(journalctl --since "24 hours ago" | grep -i "dns.*fail\|resolution.*fail" | wc -l)
log_analysis "DNS errors (24h): $dns_errors"
}
# Generate alert summary
generate_alert_summary() {
critical_issues=$(grep -c "CRITICAL\|ERROR" "$REPORT_FILE")
warning_issues=$(grep -c "WARNING" "$REPORT_FILE")
if [ "$critical_issues" -gt 0 ] || [ "$warning_issues" -gt 5 ]; then
log_analysis "=== ALERT SUMMARY ==="
log_analysis "Critical issues: $critical_issues"
log_analysis "Warning issues: $warning_issues"
# Send email alert if mail is available
if command -v mail >/dev/null 2>&1; then
{
echo "Daily Log Analysis Alert - $(hostname)"
echo "Date: $(date)"
echo "Critical issues: $critical_issues"
echo "Warning issues: $warning_issues"
echo ""
echo "Full report: $REPORT_FILE"
echo ""
tail -20 "$REPORT_FILE"
} | mail -s "Log Analysis Alert - $(hostname)" "$ALERT_EMAIL"
fi
fi
}
# Main analysis execution
log_analysis "Starting daily log analysis for $(date '+%Y-%m-%d')"
analyze_system_errors
analyze_authentication
analyze_services
analyze_disk_space
analyze_network
generate_alert_summary
log_analysis "Log analysis completed"
# Cleanup old reports (keep 30 days)
find "$LOG_ANALYSIS_DIR" -name "daily-report-*.txt" -mtime +30 -delete
Log Correlation and Pattern Detection
#!/bin/bash
# log-correlator.sh - Correlate events across different logs
CORRELATION_WINDOW="5 minutes"
OUTPUT_DIR="/var/log/correlation"
mkdir -p "$OUTPUT_DIR"
# Function to find correlated events
correlate_events() {
local timestamp=$1
local event_type=$2
local details=$3
# Convert timestamp to epoch for comparison
event_time=$(date -d "$timestamp" '+%s' 2>/dev/null || echo "0")
window_start=$((event_time - 300)) # 5 minutes before
window_end=$((event_time + 300)) # 5 minutes after
echo "=== Correlation Analysis for $event_type at $timestamp ===" >> "$OUTPUT_DIR/correlation-$(date +%Y%m%d).log"
echo "Event: $details" >> "$OUTPUT_DIR/correlation-$(date +%Y%m%d).log"
echo "Time window: $(date -d @$window_start) to $(date -d @$window_end)" >> "$OUTPUT_DIR/correlation-$(date +%Y%m%d).log"
# Check journal for related events
journalctl --since "@$window_start" --until "@$window_end" -p warning | \
head -20 >> "$OUTPUT_DIR/correlation-$(date +%Y%m%d).log"
echo "---" >> "$OUTPUT_DIR/correlation-$(date +%Y%m%d).log"
}
# Detect and correlate specific patterns
detect_patterns() {
# High CPU events
journalctl --since "1 hour ago" | grep -i "high.*cpu\|cpu.*high" | while read line; do
timestamp=$(echo "$line" | awk '{print $1, $2, $3}')
correlate_events "$timestamp" "High CPU" "$line"
done
# Memory issues
journalctl --since "1 hour ago" | grep -i "out of memory\|oom\|memory.*low" | while read line; do
timestamp=$(echo "$line" | awk '{print $1, $2, $3}')
correlate_events "$timestamp" "Memory Issue" "$line"
done
# Network timeouts
journalctl --since "1 hour ago" | grep -i "timeout\|connection.*failed" | while read line; do
timestamp=$(echo "$line" | awk '{print $1, $2, $3}')
correlate_events "$timestamp" "Network Timeout" "$line"
done
# Service failures
journalctl --since "1 hour ago" | grep -i "failed to start\|service.*failed" | while read line; do
timestamp=$(echo "$line" | awk '{print $1, $2, $3}')
correlate_events "$timestamp" "Service Failure" "$line"
done
}
# Run pattern detection
detect_patterns
# Generate summary
echo "=== Pattern Detection Summary ===" >> "$OUTPUT_DIR/correlation-$(date +%Y%m%d).log"
echo "Analysis completed at $(date)" >> "$OUTPUT_DIR/correlation-$(date +%Y%m%d).log"
Custom Log Monitoring Service
#!/bin/bash
# log-monitor-daemon.sh - Custom log monitoring daemon
PIDFILE="/var/run/log-monitor.pid"
LOGFILE="/var/log/log-monitor.log"
CONFIG_FILE="/etc/log-monitor.conf"
# Default configuration
ALERT_THRESHOLD=10
CHECK_INTERVAL=60
PATTERNS=(
"error"
"critical"
"failed"
"timeout"
"segfault"
)
# Load configuration if exists
if [ -f "$CONFIG_FILE" ]; then
source "$CONFIG_FILE"
fi
# Logging function
log_message() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOGFILE"
}
# Pattern matching function
check_patterns() {
local pattern=$1
local timeframe="$CHECK_INTERVAL seconds ago"
# Check journal for pattern
count=$(journalctl --since "$timeframe" | grep -i "$pattern" | wc -l)
if [ "$count" -gt "$ALERT_THRESHOLD" ]; then
log_message "ALERT: Pattern '$pattern' found $count times in last $CHECK_INTERVAL seconds"
# Get sample entries
journalctl --since "$timeframe" | grep -i "$pattern" | head -5 | while read line; do
log_message " Sample: $line"
done
# Send notification
send_alert "$pattern" "$count"
fi
}
# Alert sending function
send_alert() {
local pattern=$1
local count=$2
# Send to syslog
logger -t log-monitor "ALERT: Pattern '$pattern' detected $count times"
# Send email if configured
if [ -n "$ALERT_EMAIL" ] && command -v mail >/dev/null 2>&1; then
echo "Log Monitor Alert: Pattern '$pattern' detected $count times on $(hostname)" | \
mail -s "Log Monitor Alert" "$ALERT_EMAIL"
fi
# Execute custom alert script if configured
if [ -n "$ALERT_SCRIPT" ] && [ -x "$ALERT_SCRIPT" ]; then
"$ALERT_SCRIPT" "$pattern" "$count"
fi
}
# Daemon main loop
daemon_loop() {
log_message "Log monitor daemon started (PID: $$)"
while true; do
for pattern in "${PATTERNS[@]}"; do
check_patterns "$pattern"
done
sleep "$CHECK_INTERVAL"
done
}
# Signal handlers
cleanup() {
log_message "Log monitor daemon stopping"
rm -f "$PIDFILE"
exit 0
}
reload_config() {
log_message "Reloading configuration"
if [ -f "$CONFIG_FILE" ]; then
source "$CONFIG_FILE"
log_message "Configuration reloaded"
fi
}
# Set up signal handlers
trap cleanup EXIT TERM INT
trap reload_config HUP
# Check if already running
if [ -f "$PIDFILE" ]; then
if kill -0 "$(cat "$PIDFILE")" 2>/dev/null; then
echo "Log monitor daemon already running"
exit 1
else
rm -f "$PIDFILE"
fi
fi
# Start daemon
echo $$ > "$PIDFILE"
daemon_loop
# Usage:
# Start: ./log-monitor-daemon.sh &
# Stop: kill $(cat /var/run/log-monitor.pid)
# Reload: kill -HUP $(cat /var/run/log-monitor.pid)
Centralized Logging Setup
#!/bin/bash
# setup-centralized-logging.sh - Configure centralized logging
SYSLOG_SERVER="192.168.1.100"
SYSLOG_PORT="514"
# Configure rsyslog for centralized logging
setup_rsyslog_client() {
echo "Configuring rsyslog client..."
# Create client configuration
cat > /etc/rsyslog.d/50-centralized.conf << EOF
# Send all logs to central server
*.* @@${SYSLOG_SERVER}:${SYSLOG_PORT}
# Local backup (optional)
\$ActionQueueFileName queue
\$ActionQueueMaxDiskSpace 1g
\$ActionQueueSaveOnShutdown on
\$ActionQueueType LinkedList
\$ActionResumeRetryCount -1
EOF
# Restart rsyslog
systemctl restart rsyslog
echo "Rsyslog client configured"
}
# Configure journal forwarding
setup_journal_forwarding() {
echo "Configuring journal forwarding..."
# Configure journal to forward to syslog
cat > /etc/systemd/journald.conf.d/forward.conf << EOF
[Journal]
ForwardToSyslog=yes
MaxRetentionSec=1week
EOF
# Restart journald
systemctl restart systemd-journald
echo "Journal forwarding configured"
}
# Install and configure filebeat (Elastic Stack)
setup_filebeat() {
echo "Setting up Filebeat..."
# Install filebeat
wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | apt-key add -
echo "deb https://artifacts.elastic.co/packages/7.x/apt stable main" > /etc/apt/sources.list.d/elastic-7.x.list
apt update && apt install filebeat
# Configure filebeat
cat > /etc/filebeat/filebeat.yml << EOF
filebeat.inputs:
- type: journald
id: systemd-journal
- type: log
enabled: true
paths:
- /var/log/*.log
- /var/log/syslog
- /var/log/auth.log
output.elasticsearch:
hosts: ["elasticsearch-server:9200"]
setup.kibana:
host: "kibana-server:5601"
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
EOF
# Enable and start filebeat
systemctl enable filebeat
systemctl start filebeat
echo "Filebeat configured"
}
# Main setup
case "$1" in
"rsyslog")
setup_rsyslog_client
;;
"journal")
setup_journal_forwarding
;;
"filebeat")
setup_filebeat
;;
"all")
setup_rsyslog_client
setup_journal_forwarding
;;
*)
echo "Usage: $0 {rsyslog|journal|filebeat|all}"
echo " rsyslog - Configure rsyslog centralized logging"
echo " journal - Configure journal forwarding"
echo " filebeat - Configure Filebeat for Elastic Stack"
echo " all - Configure rsyslog and journal"
exit 1
;;
esac
Bu tutorial system logs va journalctl bo'yicha comprehensive guide beradi - basic log viewing'dan tortib advanced analysis, monitoring va centralized logging setup'i bilan birga.